视觉SLAM实践入门——(19)视觉里程计之多层光流法(光流金字塔)

对于单层光流法,每次迭代求解增量方程使用了8x8窗口的所有像素点的信息,当运动较大,使得估计点在下一帧图像的位置超出了这个窗口,这时光流法就会跟丢这个点

多层光流法基于图像金字塔,通过对上层图像使用光流法得到估计值(同样的8x8窗口,但是上层图像分辨率较小,因此可以估计更大的运动),再将估计值反馈到下层作为计算的初始值,从而解决单层光流法无法估计较大运动等问题

 

多层光流法的过程

1、寻找GFTT角点

2、计算图像金字塔

3、对图像金字塔,从上至下使用单层光流法

 

 

验证时可以与opencv的光流金字塔估计的位置进行对比,opencv提供的光流金字塔接口:

void cv::calcOpticalFlowPyrLK	(	
InputArray 	prevImg,
InputArray 	nextImg,
InputArray 	prevPts,
InputOutputArray 	nextPts,
OutputArray 	status,
OutputArray 	err,
Size 	winSize = Size(21, 21),
int 	maxLevel = 3,
TermCriteria 	criteria = TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 30, 0.01),
int 	flags = 0,
double 	minEigThreshold = 1e-4 
)

 

 

#include <iostream>
#include <string>
using namespace std;

//#include <list>
#include <opencv2/opencv.hpp>
#include <opencv2/core/core.hpp>
using namespace cv;

#include <Eigen/Core>
#include <Eigen/Dense>


//定义一个用于光流跟踪的类
class OpticalFlowTracker
{
public:
	OpticalFlowTracker(
		const Mat &img1,
		const Mat &img2,
		const vector<KeyPoint> &kp1,
		vector<KeyPoint> &kp2,
		vector<bool> &success,
		bool inverse = true,
		bool has_initial = false
	) : _img1(img1), _img2(img2), _kp1(kp1), _kp2(kp2), _success(success), _inverse(inverse), _has_initial(has_initial){};

	void calculateOpticalFlow(const Range &range);
		 
private:
	const Mat &_img1;
	const Mat &_img2;
	const vector<KeyPoint> &_kp1;
	vector<KeyPoint> &_kp2;
	vector<bool> &_success;
	bool _inverse = true;		//_inverse = true 使用反向光流法
	bool _has_initial = false;	//单目相机初始化后,这个标志置为true
};


//下面使用了源码,没有修改
inline float GetPixelValue(const cv::Mat &img, float x, float y) {
    //不严谨的边界检测
	//1:后面使用了多个像素进行平滑处理,如果传入点在最后一行会导致指针越界
	//2:如果传入点在最后一列,不再是使用2*2方块进行平滑处理(其中一个点位于下一行的第一列)
    if (x < 0) x = 0;
    if (y < 0) y = 0;
    if (x >= img.cols) x = img.cols - 1;
    if (y >= img.rows) y = img.rows - 1;
																			
    unsigned char *data = &img.data[int(y) * img.step + int(x)];		
	
    float xx = x - floor(x);
    float yy = y - floor(y);
	
	//平滑处理
    return 
	float(
        (1 - xx) * (1 - yy) * data[0] +
        xx * (1 - yy) * data[1] +
        (1 - xx) * yy * data[img.step] +
        xx * yy * data[img.step + 1]
    );
}

void OpticalFlowTracker::calculateOpticalFlow(const Range &range)
{
	//8x8窗口
	int half_patch_size = 4;
	int iterations = 10;

	for(size_t i = range.start; i < range.end; i++)
	{
		auto kp = _kp1[i];
		double dx = 0, dy = 0;
		//多层光流,通过上/下层光流知道kp2[i].pt
		if(_has_initial)
		{
			dx = _kp2[i].pt.x - kp.pt.x;
			dy = _kp2[i].pt.y - kp.pt.y;
		}

		double cost = 0, lastCost = 0;
		bool success = true;

		Eigen::Matrix2d H = Eigen::Matrix2d::Zero();
		Eigen::Vector2d b = Eigen::Vector2d::Zero();
		Eigen::Vector2d J;
		for(int iter = 0; iter < iterations; iter++)
		{
			//反向光流法
            if (_inverse == false) 
                H = Eigen::Matrix2d::Zero();
			
            b = Eigen::Vector2d::Zero();
		
			cost = 0;
			//对于每个点,使用8x8窗口(-4~+3)求增量方程
			for(int x = -half_patch_size; x < half_patch_size; x++)
				for(int y = -half_patch_size; y < half_patch_size; y++)
				{
					double err = GetPixelValue(_img2, kp.pt.x + x + dx, kp.pt.y + y + dy) - 
								GetPixelValue(_img1, kp.pt.x + x, kp.pt.y + y);

					//求雅可比
                    if (_inverse == false)
                        J = 1.0 * Eigen::Vector2d(
							//dI/dx=[(x3-x2)+(x2-x1)] / 2
                            0.5 * (GetPixelValue(_img2, kp.pt.x + dx + x + 1, kp.pt.y + dy + y) -
                                   GetPixelValue(_img2, kp.pt.x + dx + x - 1, kp.pt.y + dy + y)),
                            0.5 * (GetPixelValue(_img2, kp.pt.x + dx + x, kp.pt.y + dy + y + 1) -
                                   GetPixelValue(_img2, kp.pt.x + dx + x, kp.pt.y + dy + y - 1))
						);
                    else if (iter == 0)    //反向光流法
                        J = 1.0 * Eigen::Vector2d(
                            0.5 * (GetPixelValue(_img1, kp.pt.x + x + 1, kp.pt.y + y) -
                                   GetPixelValue(_img1, kp.pt.x + x - 1, kp.pt.y + y)),
                            0.5 * (GetPixelValue(_img1, kp.pt.x + x, kp.pt.y + y + 1) -
                                   GetPixelValue(_img1, kp.pt.x + x, kp.pt.y + y - 1))
                        );					
					
					//求H、b
					cost += err*err;
					b += -err*J;
                    //反向光流法
					if (_inverse == false || iter == 0)
						H += J*J.transpose();
				}
			//解增量方程
			Eigen::Vector2d update = H.ldlt().solve(b);

			if(isnan(update[0]))
			{
				cout << (int)i << "\t:isnan" << endl;
				success = false;			
				break;
			}

			if(iter > 0 && cost > lastCost)			
				break;			
				
			dx += update[0];
			dy += update[1];
			lastCost = cost;
			success = true;

			if(update.norm() < 1e-2)		
				break;																				
		}		
		
		_success[i] = success;
		_kp2[i].pt = kp.pt + Point2f(dx, dy);
	}

}

void OpticalFlowSingleLevel(const Mat &img1, const Mat &img2,
	const vector<KeyPoint> &kp1, vector<KeyPoint> &kp2, vector<bool> &success, bool inverse = false, bool has_initial = false)
{
	kp2.resize(kp1.size());
	success.resize(kp1.size());
	OpticalFlowTracker tracker(img1, img2, kp1, kp2, success, inverse, has_initial);
	//并行计算函数,参数:循环列表,函数调用(这里使用了绑定,参数:函数调用,对象,绑定参数列表(不需要绑定的使用占位符_1、_2、…))
	//opencv3.2.0好像是不支持给parallel_for_传入bind,需要升级opencv4的原因可能在这里
	//测试发现,源码中使用并行运算会出现一些随机错误,推测是多线程之间数据共享的原因
	//parallel_for_(Range(0, kp1.size()),
	//				std::bind(&OpticalFlowTracker::calculateOpticalFlow, &tracker, placeholders::_1));
		
	//不使用并行计算
	tracker.calculateOpticalFlow(Range(0, kp1.size()));
}


void OpticalFlowMultiLevel(const Mat &img1, const Mat &img2,
	const vector<KeyPoint> &kp1, vector<KeyPoint> &kp2, vector<bool> &success, bool inverse = false)
{
	int pyramids = 4;
	double pyramid_scale = 0.5;
	double scales[] = {1, 0.5, 0.25, 0.125};
	
	//图像三角形
	vector<Mat> img1_pyr, img2_pyr;
	for(int p = 0; p < pyramids; p++)
	{
		if(p == 0)
		{
			img1_pyr.push_back(img1);
			img2_pyr.push_back(img2);
		}
		else
		{
			Mat img1_tmp, img2_tmp;
			cv::resize(img1_pyr[p-1], img1_tmp, 
				cv::Size(img1_pyr[p-1].cols*pyramid_scale, img1_pyr[p-1].rows*pyramid_scale));
			img1_pyr.push_back(img1_tmp);
			cv::resize(img2_pyr[p-1], img2_tmp, 
				cv::Size(img2_pyr[p-1].cols*pyramid_scale, img2_pyr[p-1].rows*pyramid_scale));
			img2_pyr.push_back(img2_tmp);
		}
		/*
		imshow("1", img1_pyr[p]);
		waitKey(0);
		*/				
	}
	
	//计算最小图像的关键点
	vector<KeyPoint> kp1_pyr, kp2_pyr;
	for(auto kp_tmp:kp1)
	{
		kp_tmp.pt = kp_tmp.pt * scales[pyramids - 1];
		kp1_pyr.push_back(kp_tmp);
		kp2_pyr.push_back(kp_tmp);
	}
	for(int i = pyramids - 1; i >= 0; i--)
	{
		success.clear();
		//单层光流
		OpticalFlowSingleLevel(img1_pyr[i], img2_pyr[i], kp1_pyr, kp2_pyr, success, inverse, true);
		//计算下层图像关键点
		if(i > 0)
		{
			for(auto &kp_tmp:kp1_pyr)
				kp_tmp.pt = kp_tmp.pt / pyramid_scale;
			for(auto &kp_tmp:kp2_pyr)
				kp_tmp.pt = kp_tmp.pt / pyramid_scale;			
		}
	}
	//跟踪后的关键点的kp2_pyr
	for(auto &kp_tmp:kp2_pyr)
		kp2.push_back(kp_tmp);
}

int main(int argc, char **argv)
{
	if(argc != 3)
	{
		cout << "err" << endl;
		return -1;
	}
	
	//读取灰度图
	Mat img1 = imread(argv[1], IMREAD_GRAYSCALE);
	Mat img2 = imread(argv[2], IMREAD_GRAYSCALE);
	
	//检测GFTT角点,最多500个,最小特征值0.01,最小距离20
	vector<KeyPoint> kp1;
	Ptr<GFTTDetector> detector = GFTTDetector::create(500, 0.01, 20);
	detector->detect(img1, kp1);

	//使用单层光流法跟踪img1的特征点在img2的位置,success_single标记特征点是否跟丢
	vector<KeyPoint> kp2_single;
	vector<bool> success_single;
	OpticalFlowSingleLevel(img1, img2, kp1, kp2_single, success_single);
	
	//使用多层光流法跟踪
	vector<KeyPoint> kp2_multi;
	vector<bool> success_multi;	
	OpticalFlowMultiLevel(img1, img2, kp1, kp2_multi, success_multi, true);

	//使用opencv的光流法
	vector<Point2f> pt1, pt2;
	for(auto kp:kp1)
		pt1.push_back(kp.pt);
	vector<unsigned char> status_cv;
	vector<float> error_cv;
	cv::calcOpticalFlowPyrLK(img1, img2, pt1, pt2, status_cv, error_cv);


	//跟踪前后坐标信息
	for(int i = 0; i < (int)kp1.size(); i++)
	{
		Mat p1 = (Mat_<double>(1, 2) << kp1[i].pt.x, kp1[i].pt.y);
		Mat p2 = (Mat_<double>(1, 2) << kp2_single[i].pt.x, kp2_single[i].pt.y);
		Mat p2_multi = (Mat_<double>(1, 2) << kp2_multi[i].pt.x, kp2_multi[i].pt.y);		
		
		cout << i << "\t:" << p1 << " and " << p2 << " and " << p2_multi << " and " << pt2[i] << endl;
	}	
/*
	//跟丢的点被标记为 0
	//从这里可以看出,源码使用并行计算会随机跟丢一些点,opencv库的光流法并不会有这种问题,将源码程序改为不使用并行计算,也不会跟丢
	for(int i = 0; i < (int)kp1.size(); i++)
		cout << i << "\t:" << success_single[i] << " and " << success_multi[i] << " and " << (int)status_cv[i] << endl;
*/
	
	//画点
	Mat img_single;
	cv::cvtColor(img2, img_single, COLOR_GRAY2BGR);
	for(int i = 0; i < (int)kp1.size(); i++)
	{
		if(success_single[i])
		{
            cv::circle(img_single, kp2_single[i].pt, 2, cv::Scalar(0, 250, 0), 2);
			//从kp1指向kp2
            cv::line(img_single, kp1[i].pt, kp2_single[i].pt, cv::Scalar(0, 250, 0));		
		}
	}
	
	Mat img_multi;
	cv::cvtColor(img2, img_multi, COLOR_GRAY2BGR);
	for(int i = 0; i < (int)kp1.size(); i++)
	{
		if(success_multi[i])
		{
            cv::circle(img_multi, kp2_multi[i].pt, 2, cv::Scalar(0, 250, 0), 2);
			//从kp1指向kp2
            cv::line(img_multi, kp1[i].pt, kp2_multi[i].pt, cv::Scalar(0, 250, 0));		
		}
	}
	
	Mat img_cv;
	cv::cvtColor(img2, img_cv, COLOR_GRAY2BGR);
	for(int i = 0; i < (int)pt1.size(); i++)
	{
		if(status_cv[i])
		{
            cv::circle(img_cv, pt2[i], 2, cv::Scalar(0, 250, 0), 2);
			//从kp1指向kp2
            cv::line(img_cv, pt1[i], pt2[i], cv::Scalar(0, 250, 0));		
		}
	}

	imshow("single optical flow", img_single);
	imshow("multi optical flow", img_multi);
	imshow("cv optical flow", img_cv);


	waitKey(0);

	return 0;
}

 

 

 

  • 3
    点赞
  • 7
    收藏
    觉得还不错? 一键收藏
  • 2
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值