opencv学习系列:目标跟踪相关

/*
1.【opencv】光流法测试
*/
//  描述:包含程序所使用的头文件和命名空间
#include <iostream>
#include "ctype.h"
#include "stdio.h"
#include "stdlib.h"
#include <assert.h>

#include <opencv2/opencv.hpp>  //头文件
#include <opencv2/xfeatures2d.hpp> 
#include <opencv2/features2d.hpp>
#include <opencv2/core/utility.hpp>  
#include <opencv2/tracking.hpp>  
#include <opencv2/videoio.hpp>  
#include <opencv2/highgui.hpp>  
#include <cstring>  
#include <vector> 

using namespace cv; 
using namespace std;

//光流跟踪模块 parameter: frame 输入的视频帧;output 有跟踪结果的视频
Mat gray;   // 当前要跟踪感兴趣区域
Mat gray_prev;  // 前一张图片图片
vector<Point2f> points[2];  // point[0]为特征点的原来位置,point[1]为特征点的新位置
vector<Point2f> initial;    // 初始化跟踪点的位置
//描述:声明的函数
static void help_information();//输出相应信息和OpenCV版本
void tracking(Mat &frame, Mat &output);


int main()
{
    //显示一些程序信息
    help_information();


    Mat frame;//跟踪用
    Mat result;//跟踪用

    //加载使用的视频文件,放在项目程序运行文件下
    //VideoCapture capture("../libo_output/output8.avi");
    VideoCapture capture(0);
    // 摄像头读取文件开关
    if (!capture.isOpened())
    {
        std::cout << "Could not open the input video: " << std::endl;
    }
    while (true)
    {
        capture >> frame;
        if (frame.empty())
        {
            std::cout << " < < <  End of the video  > > > " << std::endl;
            break;
        }
        if (!frame.empty())
        {
            tracking(frame, result);
        }
        else
        {
            printf(" --(!) No captured frame -- Break!");
            break;
        }
        int c = waitKey(50);
        if ((char)c == 27)
        {
            break;
        }
    }




    return 0;
}


//*********************************************************自定义函数模块*******************************//

//帮助信息
static void help_information()
{
    cout << "\n\t\t\t 作者:孙立波\n"
        << "\t\t\t 光流法跟踪运动目标检测\n"
        << "\t\t\t 当前使用的OpenCV版本为:" << CV_VERSION
        << "\n\n";
}


//光流跟踪模块 parameter: frame 输入的视频帧;output 有跟踪结果的视频帧
void tracking(Mat &frame, Mat &output)
{
    cvtColor(frame, gray, CV_BGR2GRAY);
    frame.copyTo(output);
    // 添加特征点
    vector<Point2f> features;   // 检测的特征
    int maxCount = 500; // 检测的最大特征数
    double qLevel = 0.01;   // 特征检测的等级
    double minDist = 10.0;  // 两特征点之间的最小距离
    vector<uchar> status;   // 跟踪特征的状态,特征的流发现为1,否则为0
    vector<float> err;
    if (points[0].size() <= 10)
    {
        goodFeaturesToTrack(gray, features, maxCount, qLevel, minDist);
        points[0].insert(points[0].end(), features.begin(), features.end());
        //initial.insert(initial.end(), features.begin(), features.end());
    }

    if (gray_prev.empty())
    {
        gray.copyTo(gray_prev);
    }
    // l-k光流法运动估计
    calcOpticalFlowPyrLK(gray_prev, gray, points[0], points[1], status, err);
    // 去掉一些不好的特征点
    int k = 0;
    for (size_t i = 0; i<points[1].size(); i++)
    {
        if (status[i] && (abs(points[0][i].x - points[1][i].x) + abs(points[0][i].y - points[1][i].y)) > 10) //决定哪些跟踪点被接受
        {
            //initial[k++] = initial[i];
            points[1][k++] = points[1][i];
        }
    }
    points[1].resize(k);
    initial.resize(k);
    // 显示特征点和运动轨迹
    for (size_t i = 0; i<points[1].size(); i++)
    {
        //line(output, initial[i], points[1][i], Scalar(0, 0, 255));
        circle(output, points[1][i], 3, Scalar(0, 255, 0), -1);
    }

    // 把当前跟踪结果作为下一此参考
    swap(points[1], points[0]);
    swap(gray_prev, gray);
    imshow("optical flow tracking", output);
}




















/*
/用于计算一个系数特征集的光流,使用金字塔中的迭代Lucas-Kanade方法
金字塔Lucas-Kanade跟踪方法是:在图像金字塔的最高层计算光流,用得到的运动估计结果作为下一层金字塔的起始点,重复这个过程直到到达金字塔的最底层。这样就将不满足运动的假设可能性降到最小从而实现对更快和更长的运动的跟踪。
void cv::calcOpticalFlowPyrLK( InputArray _prevImg, InputArray _prevImg,
InputArray _prevPts, InputOutputArray _nextPts,
OutputArray _status, OutputArray _err,
Size winSize, int maxLevel,
TermCriteria criteria,
int flags, double minEigThreshold )
// 参数说明如下:
// _prevImg:深度为8位的前一帧图像或金字塔图像
// _prevImg:深度为8为的后一帧图像或金字塔图像
// _prevPts:前一帧图像中的特征点
// _nextPts:特征点在后一帧中的位置,并通过计算更新对应的位置,用于输出
// _status:输出状态矢量,如果相应的特征的流被发现,则为此值置为1,否则置为0
// _err:输出误差矢量,表示被跟踪点的原始图像小区域与此点在第二幅图像的小区域间的差的数组
// winSize:每个金字塔层搜索窗大小
// maxLevel:金字塔层的最大数目。为0表示不使用金字塔(单层)
// criteria:搜索算法迭代终止条件
// flags:用于标志调用前,前一帧,后一帧等的金字塔是否已经准备好
// minEigThreshold:算法计算的光流等式的2*2常规矩阵的最小特征值

///其他光流算法
1)calcOpticalFlowPyrLK
通过金字塔Lucas-Kanade 光流方法计算某些点集的光流(稀疏光流)。理解的话,可以参考这篇论文:”Pyramidal Implementation of the Lucas Kanade Feature TrackerDescription of the algorithm”
2)calcOpticalFlowFarneback
用Gunnar Farneback 的算法计算稠密光流(即图像上所有像素点的光流都计算出来)。它的相关论文是:"Two-Frame Motion Estimation Based on PolynomialExpansion"
3)CalcOpticalFlowBM
通过块匹配的方法来计算光流。
4)CalcOpticalFlowHS
用Horn-Schunck 的算法计算稠密光流。相关论文好像是这篇:”Determining Optical Flow”
5)calcOpticalFlowSF
//主要用于控制迭代算法的终止条件
class CV_EXPORTS TermCriteria
{
public:
enum Type           // 类型的枚举
{
COUNT=1,        // 按照最大迭代次数和元素达到最大值作为控制终止条件
MAX_ITER=COUNT, // 按照最大迭代次数和元素达到最大值作为控制终止条件
EPS=2           // 按照达到某个收敛的阈值作为终止条件
};

// 构造函数
TermCriteria();
TermCriteria(int type, int maxCount, double epsilon);

int type;         // 类型,主要包括最大迭代次数,收敛到某个阈值,最大迭代次数且收敛到某个阈值
int maxCount;     // 迭代的最大次数
double epsilon;   // 迭代终止的阈值
};
//功能:用于在角点检测中精确化角点位置。
void cv::cornerSubPix( InputArray _image, InputOutputArray _corners,
Size winSize, Size zeroZone,
TermCriteria criteria )
// 参数说明如下:
// _image:输入图像
// _corners:输入角点的初始坐标,并作为角点精准化的输出
// winSize:搜索窗口变长的一般,若为winSize = Size(5, 5),此时为11 * 11的搜索窗口
// zeroZone:搜索区域中间的dead region变长的一般,用于避免自相关矩阵的奇异性。为(-1, -1)表示没有这个区域
// criteria:为角点精准化跌打终止条件

//在图像中寻找具有最大特征值的角点
void cv::goodFeaturesToTrack( InputArray _image, OutputArray _corners,
int maxCorners, double qualityLevel, double minDistance,
InputArray _mask, int blockSize,
bool useHarrisDetector, double harrisK )
// 参数说明如下:
// _image:输入图像,8位或浮点32位,单通道
// _corners:输出的特征点,每一个元素就是一个特征点的位置
// maxCorners:规定的特征点最大数目
// qualityLevel:最大最小特征值的乘法因子。定义可接受图像角点的最小质量因子
// minDistance:限制因子。特征点与点之间的最小距离
// _mask:ROI感兴趣区域。为NULL时选择整个图像
// blockSize:计算导数的自相关矩阵时指定点的领域,采用小窗口计算的结果比单点计算的结果好
// useHarrisDetector:标志位。非0则使用Harris角点,为0使用Shi-Tomasi定义
// harrisK:用于设置Hessian自相关矩阵

*/



/*
2.【opencv】光流法测试2
*/
//  描述:包含程序所使用的头文件和命名空间
#include <iostream>
#include "ctype.h"
#include "stdio.h"
#include "stdlib.h"
#include <assert.h>

#include <opencv2/opencv.hpp>  //头文件
#include <opencv2/xfeatures2d.hpp> 
#include <opencv2/features2d.hpp>
#include <opencv2/core/utility.hpp>  
#include <opencv2/tracking.hpp>  
#include <opencv2/videoio.hpp>  
#include <opencv2/highgui.hpp>  
#include <cstring>  
#include <vector> 

using namespace cv; 
using namespace std;

//检测模块
void match_by_descriptors(cv::Mat& src, cv::Mat& dest, std::vector<cv::Point2f>& prevPts, std::vector<cv::Point2f> &currPts, std::vector<cv::DMatch> &matches);

void match_by_opticalflow(cv::Mat& src, cv::Mat& dest, std::vector<cv::Point2f>& prevPts, std::vector<cv::Point2f> &currPts, std::vector<cv::DMatch> &matches);
void corners_separate(const std::vector<cv::KeyPoint> &in, std::vector<cv::KeyPoint> &out, int img_clos, int img_rows, int n);
bool compare_response(cv::KeyPoint x, cv::KeyPoint y) { return (x.response > y.response); }

void get_better_contours(std::vector<std::vector<cv::Point>> &contours);
//图像差分,对于变换后的图像黑边做差后仍为黑边
void get_diff(const cv::Mat& ref, const cv::Mat& warp, cv::Mat& diff);
//光流跟踪模块 parameter: frame 输入的视频帧;output 有跟踪结果的视频
Mat gray;   // 当前要跟踪感兴趣区域
Mat gray_prev;  // 前一张图片图片
vector<Point2f> points[2];  // point[0]为特征点的原来位置,point[1]为特征点的新位置
vector<Point2f> initial;    // 初始化跟踪点的位置
//描述:声明的函数
static void help_information();//输出相应信息和OpenCV版本
void tracking(Mat &frame, Mat &output);


int main()
{
    //显示一些程序信息
    help_information();

    //
    cv::Mat frameCurrent;
    cv::Mat frameReference;
    cv::Mat warpedCurrent;

    cv::Mat gray_frameCurrent;
    cv::Mat gray_frameReference;
    cv::Mat gray_warpedCurrent;
    cv::Mat gray_diff;
    int frameNum = 0;
    bool flag=0;//进入跟踪标志位
    //加载使用的视频文件
    cv::VideoCapture capture("../libo_output/output3.avi");
    // 摄像头读取文件判断
    if (!capture.isOpened())
    {
        std::cout << "Could not open the input video: " << std::endl;
    }
    //get first image
    capture >> frameCurrent;
    frameNum++;
    std::cout << "#Frame: " << frameNum<< std::endl;
    frameReference = frameCurrent.clone();
    //GaussianBlur denoise
    cv::GaussianBlur(frameReference, frameReference, Size(3, 3), 0);
    //灰度化
    cv::cvtColor(frameReference, gray_frameReference, CV_RGBA2GRAY);
    while (true)
    {
        capture >> frameCurrent;
        //capture >> frameCurrent;
        if (frameCurrent.empty())
        {
            std::cout << " < < <  End of the video  > > > " << std::endl;
            break;
        }
        frameNum++;
        std::cout << "#Frame: " << frameNum << std::endl;
        //GaussianBlur denoise
        cv::GaussianBlur(frameCurrent, frameCurrent, Size(3, 3), 0);
        //灰度化
        cv::cvtColor(frameCurrent, gray_frameCurrent, CV_RGBA2GRAY);
        std::vector<cv::Point2f> reference_pts, current_pts;
        std::vector<cv::DMatch> matches;
        //match_by_descriptors(gray_frameReference, gray_frameCurrent, reference_pts, current_pts, matches);
        match_by_opticalflow(gray_frameReference, gray_frameCurrent, reference_pts, current_pts, matches);//用光流推测下一帧角点,筛选出好的共同的匹配对
        //获取图像1到图像2的投影映射矩阵 尺寸为3*3  
        Mat homoMatrix;
        if (matches.size() >= 4)
           homoMatrix = findHomography(reference_pts, current_pts, cv::RANSAC);
        if (matches.size() < 4 || homoMatrix.empty()) 
        {
            std::cerr << "Can't find Homography!" << std::endl;
        }
        //! inliers to keypoints
        std::vector<cv::KeyPoint> inliers_reference;
        std::vector<cv::KeyPoint> inliers_current;
        std::vector<cv::DMatch> inlier_matches;
        for (unsigned i = 0; i < current_pts.size(); i++)
        {
                int new_i = static_cast<int>(inliers_reference.size());
                cv::KeyPoint kp1, kp2;
                kp1.pt = reference_pts[i];
                kp2.pt = current_pts[i];
                inliers_reference.push_back(kp1);
                inliers_current.push_back(kp2);
                inlier_matches.push_back(cv::DMatch(new_i, new_i, 0));
        }
        cv::Mat frame_match;
        cv::drawMatches(gray_frameReference, inliers_reference, gray_frameCurrent, inliers_current,
            inlier_matches, frame_match,
            cv::Scalar(255, 255, 0), cv::Scalar(0, 255,255));
        cv::imshow("配准显示", frame_match);
        cv::warpPerspective(gray_frameReference, gray_warpedCurrent, homoMatrix, cv::Size(gray_frameCurrent.cols, gray_frameCurrent.rows));
        absdiff(gray_frameCurrent, gray_warpedCurrent, gray_diff);

        threshold(gray_diff, gray_diff, 20, 255.0, CV_THRESH_BINARY);//| CV_THRESH_OTSU
        medianBlur(gray_diff, gray_diff,3);
        //中值滤波加上形态学处理
        cv::Mat se = getStructuringElement(cv::MORPH_RECT, cv::Size(9, 9));
        morphologyEx(gray_diff, gray_diff, cv::MORPH_DILATE, se);
        cv::Mat se1 = getStructuringElement(cv::MORPH_RECT, cv::Size(7,7));
        morphologyEx(gray_diff, gray_diff, cv::MORPH_DILATE, se);
        cv::imshow("差分显示", gray_diff);

        std::vector<std::vector<cv::Point>> contours;
        cv::findContours(gray_diff, contours, CV_RETR_LIST, CV_CHAIN_APPROX_NONE);
        //! remove bad contours
        get_better_contours(contours);
        Mat image02temp = frameCurrent;
        for (int k = 0; k < contours.size(); k++)
        {


            rectangle(image02temp, Rect(20, 20, 600, 440), Scalar(255, 255, 255), 2, 8, 0);
            Rect bomen = boundingRect(contours[k]);
            //省略由于配准带来的边缘无效信息
            if ((bomen.x > 20) &&
                (bomen.y > 20) &&
                ((bomen.x + bomen.width) < 600) &&
                ((bomen.y + bomen.height) < 440))
            {

                switch (k)
                {
                case 0:rectangle(image02temp, bomen, Scalar(255, 0, 255), 2, 8, 0); break;
                case 1:rectangle(image02temp, bomen, Scalar(0, 255, 255), 2, 8, 0); break;
                case 2:rectangle(image02temp, bomen, Scalar(255, 0, 0), 2, 8, 0); break;
                default:rectangle(image02temp, bomen, Scalar(0, 0, 255), 2, 8, 0); break;
                }

            }

         }

        imshow("检测与跟踪", image02temp);
        gray_frameReference = gray_frameCurrent.clone();
        char c = cv::waitKey(30);
        if (c == 27) break;



    }



    Mat frame;//跟踪用
    Mat result;//跟踪用

    //加载使用的视频文件,放在项目程序运行文件下
    //VideoCapture capture("../libo_output/output8.avi");
    //VideoCapture capture(0);
    // 摄像头读取文件开关
    if (!capture.isOpened())
    {
        std::cout << "Could not open the input video: " << std::endl;
    }


        while (flag==1)
        {
            capture >> frame;

            if (frame.empty())
            {
                std::cout << " < < <  End of the video  > > > " << std::endl;
                break;
            }
            if (!frame.empty())
            {
                tracking(frame, result);
            }
            else
            {
                printf(" --(!) No captured frame -- Break!");
                break;
            }
            int c = waitKey(50);
            if ((char)c == 27)
            {
                break;
            }
    }




    return 0;
}


//*********************************************************自定义函数模块*******************************//

//帮助信息
static void help_information()
{
    cout << "\n\t\t\t 作者:孙立波\n"
        << "\t\t\t 光流法跟踪运动目标检测\n"
        << "\t\t\t 当前使用的OpenCV版本为:" << CV_VERSION
        << "\n\n";
}
///通过描述符得到的关键点对检测
void match_by_descriptors(cv::Mat& src, cv::Mat& dest, std::vector<cv::Point2f>& prevPts, std::vector<cv::Point2f> &currPts, std::vector<cv::DMatch> &matches)
{
    std::vector<cv::KeyPoint> keypoints1, keypoints2;
    cv::Mat descriptors1, descriptors2;
    cv::Ptr<cv::xfeatures2d::SURF> surf = cv::xfeatures2d::SURF::create(900);
    surf->detectAndCompute(src, cv::Mat(), keypoints1, descriptors1);
    surf->detectAndCompute(dest, cv::Mat(), keypoints2, descriptors2);

    cv::FlannBasedMatcher matcher;
    matches.clear();
    matcher.match(descriptors1, descriptors2, matches);

    std::sort(matches.begin(), matches.end());

    prevPts.clear();
    currPts.clear();
    for (int i = 0; i<(int)(matches.size()*0.5); i++)
    {
        prevPts.push_back(keypoints1[matches[i].queryIdx].pt);
        currPts.push_back(keypoints2[matches[i].trainIdx].pt);
    }
    matches.resize(prevPts.size());
}
void match_by_opticalflow(cv::Mat& src, cv::Mat& dest, std::vector<cv::Point2f>& prevPts, std::vector<cv::Point2f> &currPts, std::vector<cv::DMatch> &matches)
{
    std::vector<cv::KeyPoint> keypoints;
    std::vector<cv::KeyPoint> keypoints_in_cells;

    //! process first image to extractor keypoints
    //ORB_SLAM2::ORBextractor extractor(500, 1.2, 4, 20, 7);
    //extractor(src, cv::Mat(), keypoints, cv::Mat());
    cv::Ptr<cv::FastFeatureDetector> fast = cv::FastFeatureDetector::create(20, true, cv::FastFeatureDetector::TYPE_9_16);
    fast->detect(src, keypoints);

    //! seperate corners into cells
    corners_separate(keypoints, keypoints_in_cells, 640, 480, 4);

    std::vector<unsigned char> status;
    std::vector<float> error;
    std::vector<cv::Point2f> prevPts_temp, currPts_temp;
    for (int i = 0; i < keypoints_in_cells.size(); i++)
    {
        prevPts_temp.push_back(keypoints_in_cells[i].pt);
    }

    //! tracking by LK opticalflow
    //cv::cornerSubPix(gray_reference, prevPts, cv::Size(7, 7), cv::Size(-1, -1), cv::TermCriteria(cv::TermCriteria::COUNT + cv::TermCriteria::EPS, 40, 0.01));
    cv::calcOpticalFlowPyrLK(
        src, dest,
        prevPts_temp, currPts_temp,
        status, error,
        cv::Size(21, 21), 3,
        cv::TermCriteria(cv::TermCriteria::COUNT + cv::TermCriteria::EPS, 30, 0.001)
        );

    //! check status and get good matchs
    prevPts.clear();
    currPts.clear();
    int status_num = status.size();
    for (int i = 0; i < status_num; i++)
    {
        if (status[i] == 1)
        {
            if (prevPts_temp[i].x < 0 || prevPts_temp[i].x > 640 || prevPts_temp[i].y < 0 || prevPts_temp[i].y > 480)
                continue;

            int new_i = static_cast<int>(prevPts.size());

            prevPts.push_back(prevPts_temp[i]);
            currPts.push_back(currPts_temp[i]);
            matches.push_back(cv::DMatch(new_i, new_i, 0));
        }
    }
}
void corners_separate(const std::vector<cv::KeyPoint>& in, std::vector<cv::KeyPoint> &out, int img_clos, int img_rows, int n)
{
    unsigned int total_keypoints = in.size();
    assert(n > 1);
    unsigned int unit_rows = img_rows / n;
    unsigned int unit_cols = img_clos / n;

    std::vector<std::vector<cv::KeyPoint>> keypoints_in_cell;
    keypoints_in_cell.resize(n*n);
    for (unsigned int i = 0; i < total_keypoints; i++)
    {
        const cv::KeyPoint kp = in[i];
        unsigned int n_row = kp.pt.y / unit_rows;
        unsigned int n_col = kp.pt.x / unit_cols;

        if (n_row == n) n_row = n - 1;

        unsigned int num = n_row * n + n_col;
        keypoints_in_cell[num].push_back(kp);
    }

    unsigned int cell_num = n*n;
    //out.clear();
    out.resize(0);
    for (unsigned int i = 0; i < cell_num; i++)
    {
        unsigned int n_row = i / n;
        unsigned int n_col = i - n_row*n;
        std::vector<cv::KeyPoint> &kps = keypoints_in_cell[i];

        if (kps.size() != 0)
        {
            std::sort(kps.begin(), kps.end(), compare_response);

            out.push_back(kps[0]);
        }
        else
        {
            cv::Point2f pt((n_col + 0.5)*unit_cols, (n_row + 0.5)*unit_rows);
            cv::KeyPoint new_kp(pt, 31);

            out.push_back(new_kp);
        }
    }
}
///
void get_better_contours(std::vector<std::vector<cv::Point>> &contours)
{
    const int cmin = 10;  // minimum contour length
    const int cmax = 400; // maximum contour length
    std::vector<std::vector<cv::Point>>::const_iterator itc = contours.begin();
    while (itc != contours.end()) {
        if (itc->size() < cmin || itc->size() > cmax)
            itc = contours.erase(itc);
        else
            ++itc;
    }

    itc = contours.begin();
    while (itc != contours.end())
    {
        cv::Rect aRect = cv::boundingRect(*itc);
        double area = fabs(cv::contourArea(*itc));
        double aRectRitio1 = double(aRect.width) / double(aRect.height);
        double aRectRitio2 = double(aRect.height) / double(aRect.width);
        if ((area<10) || (area > 4000))
        {
            itc = contours.erase(itc);
        }
        /*else if(area < 450 && ((0.9<aRectRitio1) && (aRectRitio1<1.0)) || ((0.9<aRectRitio2) && (aRectRitio2<1.0)))
        {
        itc = contours.erase(itc);
        }*/
        else if (aRectRitio1>3 || aRectRitio2>3)
        {
            itc = contours.erase(itc);
        }
        else itc++;
    }

}
//光流跟踪模块 parameter: frame 输入的视频帧;output 有跟踪结果的视频帧
void tracking(Mat &frame, Mat &output)
{
    cvtColor(frame, gray, CV_BGR2GRAY);
    frame.copyTo(output);
    // 添加特征点
    vector<Point2f> features;   // 检测的特征
    int maxCount = 500; // 检测的最大特征数
    double qLevel = 0.01;   // 特征检测的等级
    double minDist = 10.0;  // 两特征点之间的最小距离
    vector<uchar> status;   // 跟踪特征的状态,特征的流发现为1,否则为0
    vector<float> err;
    if (points[0].size() <= 10)
    {
        goodFeaturesToTrack(gray, features, maxCount, qLevel, minDist);
        points[0].insert(points[0].end(), features.begin(), features.end());
        //initial.insert(initial.end(), features.begin(), features.end());
    }

    if (gray_prev.empty())
    {
        gray.copyTo(gray_prev);
    }
    // l-k光流法运动估计
    calcOpticalFlowPyrLK(gray_prev, gray, points[0], points[1], status, err);
    // 去掉一些不好的特征点
    int k = 0;
    for (size_t i = 0; i<points[1].size(); i++)
    {
        if (status[i] && (abs(points[0][i].x - points[1][i].x) + abs(points[0][i].y - points[1][i].y)) > 10) //决定哪些跟踪点被接受
        {
            //initial[k++] = initial[i];
            points[1][k++] = points[1][i];
        }
    }
    points[1].resize(k);
    initial.resize(k);
    // 显示特征点和运动轨迹
    for (size_t i = 0; i<points[1].size(); i++)
    {
        //line(output, initial[i], points[1][i], Scalar(0, 0, 255));
        circle(output, points[1][i], 3, Scalar(0, 255, 0), -1);
    }

    // 把当前跟踪结果作为下一此参考
    swap(points[1], points[0]);
    swap(gray_prev, gray);
    imshow("optical flow tracking", output);
}

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值